Advanced Lane Finding Project

The goals / steps of this project are the following:

  • Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
  • Apply a distortion correction to raw images.
  • Use color transforms, gradients, etc., to create a thresholded binary image.
  • Apply a perspective transform to rectify binary image ("birds-eye view").
  • Detect lane pixels and fit to find the lane boundary.
  • Determine the curvature of the lane and vehicle position with respect to center.
  • Warp the detected lane boundaries back onto the original image.
  • Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.

First, I'll compute the camera calibration using chessboard images

In [1]:
%%time
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
%matplotlib qt
nx = 9 # the number of inside corners in x
ny = 6 # the number of inside corners in y
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((ny*nx,3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)

# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.

# Make a list of calibration images
images = glob.glob('camera_cal/calibration*.jpg')

# Step through the list and search for chessboard corners
no_img=0
img_size = 0
for fname in images:
    img = cv2.imread(fname)
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    # Find the chessboard corners
    ret, corners = cv2.findChessboardCorners(gray, (nx,ny),None)

    # If found, add object points, image points
    if ret == True:
        objpoints.append(objp)
        imgpoints.append(corners)

        # Draw and display the corners
        img = cv2.drawChessboardCorners(img, (nx,ny), corners, ret)
        cv2.imshow('img',img)
        cv2.waitKey(500)
        no_img += 1
        img_size = (gray.shape[1], gray.shape[0])
cv2.destroyAllWindows()
print("no of image processed successfully:", no_img)
print("Image processed Size:", img_size)
no of image processed successfully: 17
Image processed Size: (1280, 720)
Wall time: 39.5 s
In [2]:
import pickle

# Camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)

# Save the camera calibration result for later use 
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
dist_pickle["rvecs"] = rvecs
dist_pickle["tvecs"] = tvecs
pickle.dump( dist_pickle, open( "Project_cam_dist_pickle.p", "wb" ) )

Apply a distortion correction to raw images.

In [3]:
%matplotlib inline
# Test undistortion on an image
img = cv2.imread('camera_cal/calibration1.jpg')


dst = cv2.undistort(img, mtx, dist, None, mtx)
#dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
# Visualize undistortion
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(dst)
ax2.set_title('Undistorted Image', fontsize=30)

cv2.imwrite('output_images/calibration1_undistorted.jpg',dst)
Out[3]:
True

Code for Lane finding using camera calibration parameters

In [4]:
import pickle
import cv2
import numpy as np
import glob, os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg

# Read in the saved camera matrix and distortion coefficients
# These are the arrays you calculated using cv2.calibrateCamera()
dist_pickle = pickle.load( open( "Project_cam_dist_pickle.p", "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
images = glob.glob('test_images/*.jpg')

Distortion correction

In [5]:
# defining function for distortion correction
def un_distort(img):
    dst = cv2.undistort(img, mtx, dist, None, mtx)
    return dst
In [6]:
no_test_imgs = len(images)
fig, axs = plt.subplots(no_test_imgs, 2, figsize=(8*2, 4*no_test_imgs))
un_dst_img=[]
axs = axs.ravel()
for i in range(no_test_imgs):
    img = cv2.imread(images[i])
    un_dst_img.append(un_distort(img))
    axs[i*2].imshow(np.flip(img,2))
    axs[i*2+1].imshow(np.flip(un_dst_img[i],2))
    cv2.imwrite("output_images/distortion_corrected_" + os.path.basename(images[i]), un_dst_img[i])

Color and Gradient thresholding

In [7]:
def detect_color_gradient(img, s_thresh=(170, 255), sx_thresh=(15, 100), r_thresh = (200,255)):
    img = np.copy(img)
    r_channel = img[:,:,0]
    # Convert to HLS color space and separate the V channel
    hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
#     h_channel = hls[:,:,0]
    l_channel = hls[:,:,1]
    s_channel = hls[:,:,2]

    # color channel processing and thresholding
    color_binary = np.zeros_like(s_channel)
    color_binary[((s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])) \
            |  (r_channel >= r_thresh[0]) & (r_channel <= r_thresh[1])] = 1
#     color_binary[((s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1]))]= 1
    
    # sobel gradient
    sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
    abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
    scaled_sobelx = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
    # Threshold x gradient
    sxbinary = np.zeros_like(scaled_sobelx)
    sxbinary[((scaled_sobelx >= sx_thresh[0]) & (scaled_sobelx <= sx_thresh[1]))] = 1    
    
    # Combining gradient and color threshold binary(black-white) images
    binary_img = np.zeros_like(s_channel)
    binary_img[(color_binary ==1) | (sxbinary ==1) ] = 1
    binary_img = np.uint8(binary_img* 255)
    
    return binary_img
    
In [8]:
%%time
## plot gradient test images
fig, axs = plt.subplots(no_test_imgs, 2, figsize=(8*2, 4*no_test_imgs))
gradient_img=[]
axs = axs.ravel()
for i in range(no_test_imgs):
#     img = cv2.imread(images[i])
    gradient_img.append(detect_color_gradient(un_dst_img[i]))
    axs[i*2].imshow(np.flip(un_dst_img[i],2))
    axs[i*2+1].imshow(gradient_img[i], cmap = 'gray')
    cv2.imwrite("output_images/thresholded_binary_" + os.path.basename(images[i]), gradient_img[i])    
###### this is combination of s and r channel and soble x gradient on l-channel of HLS 
Wall time: 4.64 s

Prespective Transform

In [9]:
# src1 = np.float32([ [580, 460 ], [180, 720 ], [1220, 720], [730, 460 ]])
# dst1 = np.float32( [[320, 0 ], [320, 720 ], [960, 720], [960, 0 ]])
# src1 = np.float32( [[418, 570], [272, 674 ], [1052, 674], [882, 570 ]])
# dst1 = np.float32( [[320, 605], [320, 700 ], [960, 700], [960, 605]])
src1 = np.float32([ [418, 570], [272, 674], [1052, 674], [882, 570]])
dst1 = np.float32( [[320, 610], [320, 700], [1060, 700], [1060, 610]])
    
def prespective_transform(img, src=src1, dst=dst1):
    
    # cv2.getPerspectiveTransform() to get M, Minv the transform matrix
    M = cv2.getPerspectiveTransform(src,dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    # cv2.warpPerspective() to warp your image to a top-down view
    warped = cv2.warpPerspective(img, M, (1280, 720), flags=cv2.INTER_LINEAR)
    # print(img.shape[::-1])
    return warped, M, Minv
    
In [10]:
%%time
# plot prespective transform
fig, axs = plt.subplots(no_test_imgs, 3, figsize=(16*3, 9*no_test_imgs))
warped_img=[]
M =[] 
Minv =[]
axs = axs.ravel()
for i in range(no_test_imgs):
#     img = cv2.imread(images[i])
    wp_img, M, Minv = prespective_transform(gradient_img[i])
    warped_img.append(wp_img)
    axs[i*3].imshow(np.flip(un_dst_img[i],2))
    axs[i*3+1].imshow(gradient_img[i], cmap = 'gray')
    axs[i*3+2].imshow(warped_img[i], cmap = 'gray')
    cv2.imwrite("output_images/warped_img_" + os.path.basename(images[i]), warped_img[i]) 
Wall time: 4.65 s
In [11]:
def skip_sliding_window(binary_warped, left_fit, right_fit):
    
    if left_fit.shape[0]>0 and right_fit.shape[0]>0:
        nonzero = binary_warped.nonzero()
        nonzeroy = np.array(nonzero[0])
        nonzerox = np.array(nonzero[1])
        margin = 100
        left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + 
        left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + 
        left_fit[1]*nonzeroy + left_fit[2] + margin))) 

        right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + 
        right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + 
        right_fit[1]*nonzeroy + right_fit[2] + margin)))  

        # Again, extract left and right line pixel positions
        leftx = nonzerox[left_lane_inds]
        lefty = nonzeroy[left_lane_inds] 
        rightx = nonzerox[right_lane_inds]
        righty = nonzeroy[right_lane_inds]
        # Fit a second order polynomial to each
        if len(leftx)== 0 & len(leftx)== 0:
            ret1=0
        if len(leftx)!= 0:
            left_fit = np.polyfit(lefty, leftx, 2)
            ret1=1
        if len(rightx)!= 0:
            right_fit = np.polyfit(righty, rightx, 2)
            ret1=1

    else:
        ret1=0
        
    return ret1, left_fit, right_fit
    
In [12]:
global left_fit, right_fit, with_skip_sliding_window
with_skip_sliding_window = 0
left_fit = np.empty((0,3))
right_fit = np.empty((0,3))
In [13]:
def detect_lane(binary_warped, debugging_flag='False'):
    global left_fit, right_fit, with_skip_sliding_window
    ret1=0
    ## checking if lane can be detected closer to previous frames detected lanes
    if with_skip_sliding_window==1 and debugging_flag=='False':
        ret1, left_fit, right_fit = skip_sliding_window(binary_warped, left_fit, right_fit)
        
    if ret1==0:  
        
        # Assuming you have created a warped binary image called "binary_warped"
        # Take a histogram of the bottom half of the image
        histogram = np.sum(binary_warped[np.int(binary_warped.shape[0]/2):,:], axis=0)
        # Create an output image to draw on and  visualize the result
        if debugging_flag=='True':
            left_fit = np.empty((0,3))
            right_fit = np.empty((0,3))
            out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255

        # Find the peak of the left and right halves of the histogram
        # These will be the starting point for the left and right lines
        midpoint = np.int(histogram.shape[0]/2)
        leftx_base = np.argmax(histogram[:midpoint])
        rightx_base = np.argmax(histogram[midpoint:]) + midpoint
        
        # Choose the number of sliding windows
        nwindows = 9
        # Set height of windows
        window_height = np.int(binary_warped.shape[0]/nwindows)
        # Identify the x and y positions of all nonzero pixels in the image
        nonzero = binary_warped.nonzero()
        nonzeroy = np.array(nonzero[0])
        nonzerox = np.array(nonzero[1])
        # Current positions to be updated for each window
        leftx_current = leftx_base
        rightx_current = rightx_base
        # Set the width of the windows +/- margin
        margin = 100
        # Set minimum number of pixels found to recenter window
        minpix = 50
        # Create empty lists to receive left and right lane pixel indices
        left_lane_inds = []
        right_lane_inds = []

        # Step through the windows one by one
        for window in range(nwindows):
            # Identify window boundaries in x and y (and right and left)
            win_y_low = binary_warped.shape[0] - (window+1)*window_height
            win_y_high = binary_warped.shape[0] - window*window_height
            win_xleft_low = leftx_current - margin
            win_xleft_high = leftx_current + margin
            win_xright_low = rightx_current - margin
            win_xright_high = rightx_current + margin
            if debugging_flag=='True':
                # Draw the windows on the visualization image
                cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2) 
                cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high), (0,255,0), 2) 

            # Identify the nonzero pixels in x and y within the window
            good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \
            (nonzerox >= win_xleft_low) &  (nonzerox < win_xleft_high)).nonzero()[0]
            good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \
            (nonzerox >= win_xright_low) &  (nonzerox < win_xright_high)).nonzero()[0]
            # Append these indices to the lists
            left_lane_inds.append(good_left_inds)
            right_lane_inds.append(good_right_inds)
            # If you found > minpix pixels, recenter next window on their mean position
            if len(good_left_inds) > minpix:
                leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
            if len(good_right_inds) > minpix:        
                rightx_current = np.int(np.mean(nonzerox[good_right_inds]))

        # Concatenate the arrays of indices
        left_lane_inds = np.concatenate(left_lane_inds)
        right_lane_inds = np.concatenate(right_lane_inds)

        # Extract left and right line pixel positions
        leftx = nonzerox[left_lane_inds]
        lefty = nonzeroy[left_lane_inds] 
        rightx = nonzerox[right_lane_inds]
        righty = nonzeroy[right_lane_inds] 

        # Fit a second order polynomial to each
        if len(leftx)!= 0:
            left_fit = np.polyfit(lefty, leftx, 2)
        if len(rightx)!= 0:
            right_fit = np.polyfit(righty, rightx, 2)

        if debugging_flag=='False':
            return left_fit, right_fit
        else:  ## for debugging
            # Generate x and y values for plotting
            ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
            left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
            right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]

            ## visvulatization for debugging
            out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
            out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
            for i,y in enumerate(ploty):
                out_img[int(y),int(left_fitx[i])]  = [255, 255, 0]
                out_img[int(y), int(right_fitx[i])] = [255, 255, 0]
            return out_img
    else:
        return left_fit, right_fit
In [14]:
%%time
# plot lanes detected 
columns=4
fig, axs = plt.subplots(no_test_imgs, columns, figsize=(16*columns, 9*no_test_imgs))
lanes_imgs=[]
M =[] 
Minv =[]
axs = axs.ravel()
for i in range(no_test_imgs):
#  using the detect_lane in debug mode to visvualize the sliding boxes processing 
    lanes_img = detect_lane(warped_img[i], 'True')
#     lanes_imgs.append(lanes_img)
    axs[i*columns].imshow(np.flip(un_dst_img[i],2))
    axs[i*columns+1].imshow(gradient_img[i], cmap = 'gray')
    axs[i*columns+2].imshow(warped_img[i], cmap = 'gray')
    axs[i*columns+3].imshow(lanes_img)
    cv2.imwrite("output_images/lanes_img_" + os.path.basename(images[i]), lanes_img) 
Wall time: 5.05 s
In [15]:
def calc_lane_curvature(y_len, left_fit, right_fit):    # Define y-value where we want radius of curvature
    
    ploty = np.linspace(0, y_len-1, y_len )
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]    
    
    y_eval = ploty[-1] # using highest value of y-coordinate which would be closed to our vehicle front portion
   
    # Define conversions in x and y from pixels space to meters
    ym_per_pix = 30/720 # meters per pixel in y dimension
    xm_per_pix = 3.7/700 # meters per pixel in x dimension

    # Fit new polynomials to x,y in world space
    left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
    right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)

    # Calculate the new radii of curvature, by rescalling on meters instead of pixels
    left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
    right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
    # Now our radius of curvature is in meters
#     print(left_curverad, 'm', right_curverad, 'm')
    avg_curverad = (left_curverad + right_curverad)/2
#     print("Average: ",avg_curverad, 'm')
    
    return avg_curverad
In [16]:
def draw_lanes(undist, warped, Minv, left_fit, right_fit):
    
#   Generate x and y values for plotting
    ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
    
    # getting center of lanes in warped image at highest value of y-coordinate which would be closed to our vehicle front portion
    lane_center = (left_fitx[-1]+ left_fitx[-1])/2
    

    # Create an image to draw the lines on
    warp_zero = np.zeros_like(warped).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
    pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
    pts = np.hstack((pts_left, pts_right))

    # Draw the lane onto the warped blank image
    cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, Minv, (undist.shape[1], undist.shape[0])) 
    # Combine the result with the original image
    result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
    
    return result, lane_center
 
    
In [17]:
def pipeline_image_processing(img): 
    # finding image size
    imshape = img.shape 
    # removing the distortion from image captured by the vehicle camera
    undistort_img = un_distort(img)
    
    
    binary_img1 = detect_color_gradient(undistort_img, s_thresh=(170, 255), sx_thresh=(15, 100))
    
    # declaring source and destination point for warping image to get the top view of the road
#     src = np.float32([ [585, 460 ], [203, 720 ], [1127, 720], [695, 460 ]])
#     dst  =np.float32( [[320, 0 ], [320, 720 ], [960, 720], [960, 0 ]])
#     src = np.float32([ [580, 460 ], [180, 720 ], [1220, 720], [730, 460 ]])
#     dst  =np.float32( [[320, 0 ], [320, 720 ], [960, 720], [960, 0 ]])
#     src = np.float32([ [418, 570], [272, 674 ], [1052, 674], [882, 570 ]])
#     dst  =np.float32( [[320, 100 ], [320, 720 ], [960, 720], [960, 100]])
#     src = np.float32([ [418, 570], [272, 674], [1052, 674], [882, 570]])
#     dst = np.float32( [[320, 610], [320, 700], [960, 700], [960, 610]])
    src = np.float32([ [418, 570], [272, 674], [1052, 674], [882, 570]])
    dst = np.float32( [[320, 610], [320, 700], [1060, 700], [1060, 610]])
    # Warping color_gradient thresholded image to get the top view of the road 
    warped_img, M, Minv = prespective_transform(binary_img1, src, dst)
    
    
    left_fit, right_fit = detect_lane(warped_img)
    avg_curverature = calc_lane_curvature(warped_img.shape[0],left_fit, right_fit)
    result, lane_center = draw_lanes(undistort_img, warped_img, Minv, left_fit, right_fit)
    
    #calculated position of vehicle deviated from the center of the lane
    # which would center of difference between 'center of lane' minus 'center of image'(denote center of vehicle) , 
    # above difference scaled by a factor 'meters per pixel' in 'x-direction'(column of image) to convert the distance in meters
    vehicle_position = ((imshape[1]/2)- (src[1][0]+lane_center))*(3.7/700)
        
    # Overlay pipline stages
    overlay_col_size = int(imshape[1]/4)
    overlay_row_size = int(imshape[0]/4)
    # resizing to fit smaller window
    undistort_img_resized = cv2.resize(undistort_img, (overlay_col_size, overlay_row_size ))
    binary_img1_resized = cv2.resize(binary_img1, (overlay_col_size, overlay_row_size ))
    warped_img_resized = cv2.resize(warped_img, (overlay_col_size, overlay_row_size ))
    
    # adding Undistorted to final output, with image label
    cv2.putText(result, "Undistorted image", (25, 20),\
                    cv2.FONT_HERSHEY_PLAIN, 1.1, (255, 255, 0), 1)
    result[30:overlay_row_size + 30, 10:overlay_col_size +10 ]  \
    = undistort_img_resized          
    
    
    # adding Gradient to final output, with image label
    cv2.putText(result, "Gradient", (overlay_col_size + 35, 20),\
                cv2.FONT_HERSHEY_PLAIN, 1.1, (255, 255, 0), 1)
    result[30:overlay_row_size + 30,  overlay_col_size + 20: 2*overlay_col_size +20 ]  \
    = cv2.cvtColor(binary_img1_resized, cv2.COLOR_GRAY2RGB)


    # adding Warped to final output, with image label
    cv2.putText(result, "Warped image", (2*overlay_col_size +45, 20),\
            cv2.FONT_HERSHEY_PLAIN, 1.1, (255, 255, 0), 1)
    result[30:overlay_row_size + 30, 2*overlay_col_size +30: 3*overlay_col_size +30 ]  \
    = cv2.cvtColor(warped_img_resized, cv2.COLOR_GRAY2RGB)
    
    # adding Radius of curvature to final output
    cv2.putText(result, "Radius of curvature: {0:.0f} m".format(avg_curverature), (int((result.shape[1])*0.75)+40, 50),\
                    cv2.FONT_HERSHEY_PLAIN, 1.1, (255, 255, 0), 1)
    # adding Position to final output
    cv2.putText(result, "Position: {0:.3f} m".format(vehicle_position), (int((result.shape[1])*0.75)+40, 75),\
                    cv2.FONT_HERSHEY_PLAIN, 1.1, (255, 255, 0), 1)
    return result
    
In [19]:
%%time
no_test_imgs = len(images)
fig, axs = plt.subplots(no_test_imgs, 2, figsize=(16*2, 9*no_test_imgs))

axs = axs.ravel()
for i in range(no_test_imgs):
    img = cv2.imread(images[i])
    final_img = pipeline_image_processing(img)
    axs[i*2].imshow(np.flip(img,2))
    axs[i*2+1].imshow(np.flip(final_img,2))
    cv2.imwrite("output_images/pipeline_processed_" + os.path.basename(images[i]), final_img) 
Wall time: 9.35 s
In [ ]:
# fig.savefig('output_images/pipeline_processed.jpg', dpi=100)

Video Processing

In [20]:
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
In [21]:
global with_skip_sliding_window
with_skip_sliding_window = 0

vid_output = 'project_video_output.mp4'
# clip1 = VideoFileClip("project_video.mp4").subclip(0,5)
clip1 = VideoFileClip("project_video.mp4")
output_clip = clip1.fl_image(pipeline_image_processing) #NOTE: this function expects color images!!

%time output_clip.write_videofile(vid_output, audio=False)
[MoviePy] >>>> Building video project_video_output.mp4
[MoviePy] Writing video project_video_output.mp4
100%|█████████████████████████████████████▉| 1260/1261 [05:03<00:00,  4.44it/s]
[MoviePy] Done.
[MoviePy] >>>> Video ready: project_video_output.mp4 

Wall time: 5min 6s
In [22]:
HTML("""
<video width="960" height="540" controls>
  <source src="{0}">
</video>
""".format(vid_output))
Out[22]: